diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -521,6 +521,25 @@ LLVMMatchType<1>]), [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For indexed segment load + // Input: (pointer, index, vl) + class RISCVISegLoad + : Intrinsic, + !add(nf, -1))), + [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For indexed segment load with mask + // Input: (maskedoff, pointer, index, mask, vl) + class RISCVISegLoadMask + : Intrinsic, + !add(nf, -1))), + !listconcat(!listsplat(LLVMMatchType<0>, nf), + [LLVMPointerToElt<0>, + llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty]), + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For unit stride segment store // Input: (value, pointer, vl) class RISCVUSSegStore @@ -670,6 +689,10 @@ def "int_riscv_" # NAME : RISCVSSegLoad; def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask; } + multiclass RISCVISegLoad { + def "int_riscv_" # NAME : RISCVISegLoad; + def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMask; + } multiclass RISCVUSSegStore { def "int_riscv_" # NAME : RISCVUSSegStore; def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask; @@ -972,6 +995,8 @@ foreach nf = [2, 3, 4, 5, 6, 7, 8] in { defm vlseg # nf : RISCVUSSegLoad; defm vlsseg # nf : RISCVSSegLoad; + // TODO: In v1.0, it should be vloxseg. + defm vlxseg # nf : RISCVISegLoad; defm vsseg # nf : RISCVUSSegStore; defm vssseg # nf : RISCVSSegStore; } diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -57,6 +57,8 @@ void selectVLSEG(SDNode *Node, unsigned IntNo, bool IsStrided); void selectVLSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided); + void selectVLXSEG(SDNode *Node, unsigned IntNo); + void selectVLXSEGMask(SDNode *Node, unsigned IntNo); void selectVSSEG(SDNode *Node, unsigned IntNo, bool IsStrided); void selectVSSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -169,7 +169,8 @@ Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); // Chain. const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( - IntNo, ScalarSize, static_cast(LMUL)); + IntNo, ScalarSize, static_cast(LMUL), + static_cast(RISCVVLMUL::LMUL_1)); SDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); SDValue SuperReg = SDValue(Load, 0); @@ -207,7 +208,79 @@ Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); /// Chain. const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( - IntNo, ScalarSize, static_cast(LMUL)); + IntNo, ScalarSize, static_cast(LMUL), + static_cast(RISCVVLMUL::LMUL_1)); + SDNode *Load = + CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); + SDValue SuperReg = SDValue(Load, 0); + for (unsigned I = 0; I < NF; ++I) + ReplaceUses(SDValue(Node, I), + CurDAG->getTargetExtractSubreg(getSubregIndexByEVT(VT, I), DL, + VT, SuperReg)); + + ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); + CurDAG->RemoveDeadNode(Node); +} + +void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned IntNo) { + SDLoc DL(Node); + unsigned NF = Node->getNumValues() - 1; + EVT VT = Node->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SDValue Operands[] = { + Node->getOperand(2), // Base pointer. + Node->getOperand(3), // Index. + Node->getOperand(4), // VL. + SEW, Node->getOperand(0) // Chain. + }; + + EVT IndexVT = Node->getOperand(3)->getValueType(0); + RISCVVLMUL IndexLMUL = getLMUL(IndexVT); + unsigned IndexScalarSize = IndexVT.getScalarSizeInBits(); + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, IndexScalarSize, static_cast(LMUL), + static_cast(IndexLMUL)); + SDNode *Load = + CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); + SDValue SuperReg = SDValue(Load, 0); + for (unsigned I = 0; I < NF; ++I) + ReplaceUses(SDValue(Node, I), + CurDAG->getTargetExtractSubreg(getSubregIndexByEVT(VT, I), DL, + VT, SuperReg)); + + ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); + CurDAG->RemoveDeadNode(Node); +} + +void RISCVDAGToDAGISel::selectVLXSEGMask(SDNode *Node, unsigned IntNo) { + SDLoc DL(Node); + unsigned NF = Node->getNumValues() - 1; + EVT VT = Node->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); + SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); + SDValue Operands[] = { + MaskedOff, + Node->getOperand(NF + 2), // Base pointer. + Node->getOperand(NF + 3), // Index. + Node->getOperand(NF + 4), // Mask. + Node->getOperand(NF + 5), // VL. + SEW, + Node->getOperand(0) // Chain. + }; + + EVT IndexVT = Node->getOperand(NF + 3)->getValueType(0); + RISCVVLMUL IndexLMUL = getLMUL(IndexVT); + unsigned IndexScalarSize = IndexVT.getScalarSizeInBits(); + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, IndexScalarSize, static_cast(LMUL), + static_cast(IndexLMUL)); SDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); SDValue SuperReg = SDValue(Load, 0); @@ -245,7 +318,8 @@ Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); // Chain. const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( - IntNo, ScalarSize, static_cast(LMUL)); + IntNo, ScalarSize, static_cast(LMUL), + static_cast(RISCVVLMUL::LMUL_1)); SDNode *Store = CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); ReplaceNode(Node, Store); @@ -278,7 +352,8 @@ Operands.push_back(SEW); Operands.push_back(Node->getOperand(0)); // Chain. const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( - IntNo, ScalarSize, static_cast(LMUL)); + IntNo, ScalarSize, static_cast(LMUL), + static_cast(RISCVVLMUL::LMUL_1)); SDNode *Store = CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); ReplaceNode(Node, Store); @@ -446,6 +521,26 @@ selectVLSEGMask(Node, IntNo, /*IsStrided=*/true); return; } + case Intrinsic::riscv_vlxseg2: + case Intrinsic::riscv_vlxseg3: + case Intrinsic::riscv_vlxseg4: + case Intrinsic::riscv_vlxseg5: + case Intrinsic::riscv_vlxseg6: + case Intrinsic::riscv_vlxseg7: + case Intrinsic::riscv_vlxseg8: { + selectVLXSEG(Node, IntNo); + return; + } + case Intrinsic::riscv_vlxseg2_mask: + case Intrinsic::riscv_vlxseg3_mask: + case Intrinsic::riscv_vlxseg4_mask: + case Intrinsic::riscv_vlxseg5_mask: + case Intrinsic::riscv_vlxseg6_mask: + case Intrinsic::riscv_vlxseg7_mask: + case Intrinsic::riscv_vlxseg8_mask: { + selectVLXSEGMask(Node, IntNo); + return; + } } break; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -325,6 +325,7 @@ unsigned int IntrinsicID; unsigned int SEW; unsigned int LMUL; + unsigned int IndexLMUL; unsigned int Pseudo; }; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -400,17 +400,18 @@ let PrimaryKeyName = "getRISCVVIntrinsicInfo"; } -class RISCVZvlsseg S, bits<3> L> { +class RISCVZvlsseg S, bits<3> L, bits<3> IL = V_M1.value> { Intrinsic IntrinsicID = !cast(IntrName); bits<11> SEW = S; bits<3> LMUL = L; + bits<3> IndexLMUL = IL; Pseudo Pseudo = !cast(NAME); } def RISCVZvlssegTable : GenericTable { let FilterClass = "RISCVZvlsseg"; - let Fields = ["IntrinsicID", "SEW", "LMUL", "Pseudo"]; - let PrimaryKey = ["IntrinsicID", "SEW", "LMUL"]; + let Fields = ["IntrinsicID", "SEW", "LMUL", "IndexLMUL", "Pseudo"]; + let PrimaryKey = ["IntrinsicID", "SEW", "LMUL", "IndexLMUL"]; let PrimaryKeyName = "getPseudo"; } @@ -441,7 +442,8 @@ string L = !subst("VLSEG", "vlseg", !subst("VLSSEG", "vlsseg", !subst("VSSEG", "vsseg", - !subst("VSSSEG", "vssseg", Upper)))); + !subst("VSSSEG", "vssseg", + !subst("VLXSEG", "vlxseg", Upper))))); } // Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2 @@ -453,7 +455,11 @@ !subst("E16", "", !subst("E32", "", !subst("E64", "", - !subst("_V", "", PseudoToVInst.VInst)))))>.L, + !subst("EI8", "", + !subst("EI16", "", + !subst("EI32", "", + !subst("EI64", "", + !subst("_V", "", PseudoToVInst.VInst)))))))))>.L, !if(IsMasked, "_mask", "")); } @@ -1045,6 +1051,40 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoISegLoadNoMask EEW, bits<3> LMUL>: + Pseudo<(outs RetClass:$rd), + (ins GPR:$rs1, IdxClass:$offset, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul, LMUL> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoISegLoadMask EEW, bits<3> LMUL>: + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, GPR:$rs1, + IdxClass:$offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul, LMUL> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = "$rd = $merge"; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoUSSegStoreNoMask EEW>: Pseudo<(outs), (ins ValClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, @@ -1647,6 +1687,27 @@ } } +multiclass VPseudoISegLoad { + foreach idx_eew = EEWList in { // EEW for index argument. + foreach idx_lmul = MxSet.m in { // LMUL for index argument. + foreach val_lmul = MxList.m in { // LMUL for the value. + defvar IdxLInfo = idx_lmul.MX; + defvar IdxVreg = idx_lmul.vrclass; + defvar ValLInfo = val_lmul.MX; + let VLMul = val_lmul.value in { + foreach nf = NFSet.L in { + defvar ValVreg = SegRegClass.RC; + def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo : + VPseudoISegLoadNoMask; + def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" : + VPseudoISegLoadMask; + } + } + } + } + } +} + multiclass VPseudoUSSegStore { foreach eew = EEWList in { foreach lmul = MxSet.m in { @@ -2877,6 +2938,8 @@ //===----------------------------------------------------------------------===// defm PseudoVLSEG : VPseudoUSSegLoad; defm PseudoVLSSEG : VPseudoSSegLoad; +// TODO: In v1.0, it should be PseudoVLOXSEG. +defm PseudoVLXSEG : VPseudoISegLoad; defm PseudoVSSEG : VPseudoUSSegStore; defm PseudoVSSSEG : VPseudoSSegStore; diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlxseg-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlxseg-rv32.ll @@ -0,0 +1,633 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i16(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i16(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv16i16(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i8(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i8(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i8(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv16i8(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i32(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i32(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv2i32(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i16(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i16(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv4i16(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i16(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv32i16(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv32i16(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv32i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i32(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i16(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i16(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv8i16(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i8(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i8(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv8i8(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i32(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i32(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv8i32(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv64i8(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv64i8(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv64i8(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv64i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i8(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i8(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv4i8(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i16(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i8(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv32i8(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv32i8(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv32i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i8(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i8(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv2i8(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i32(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i32(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv16i32(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i16(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i16(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv2i16(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i32(double*, , i32) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i32(,,, double*, , , i32) + +define @test_vlxseg3_nxv1f64_nxv4i32(double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlxseg-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlxseg-rv64.ll @@ -0,0 +1,112578 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv16i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv32i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv32i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv4i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv16i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv1i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv1i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv1i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv8i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv4i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv1i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv2i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv8i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv4i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv4i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv64i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv64i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv4i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv8i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv8i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv1i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv2i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv8i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv32i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv32i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv16i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv2i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv2i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv16i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i16.nxv2i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv16i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv16i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv32i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv32i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv16i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv16i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv1i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv1i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv1i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv8i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv1i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv2i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv8i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv4i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv4i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv64i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv64i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv8i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv8i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv1i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv2i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv8i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv32i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv32i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv16i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv16i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv2i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv2i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i32.nxv2i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv16i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv16i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv16i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv32i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv32i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv32i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv4i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv16i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv16i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv16i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv1i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv1i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv1i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv1i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv8i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv8i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv4i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv1i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv1i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv2i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv2i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv8i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv8i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv4i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv4i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv64i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv64i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv64i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv4i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv8i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv8i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv1i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv1i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv2i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv2i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv8i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv8i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv32i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv32i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv32i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv16i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv16i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv16i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv2i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv2i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv2i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i32.nxv2i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv16i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv16i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv16i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv32i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv32i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv32i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv4i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv16i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv16i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv16i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv1i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv1i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv1i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv1i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv8i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv8i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv4i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv1i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv1i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv2i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv2i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv8i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv8i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv4i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv4i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv64i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv64i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv64i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv4i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv8i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv8i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv1i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv1i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv2i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv2i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv8i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv8i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv32i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv32i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv32i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv16i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv16i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv16i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv2i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv2i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv2i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i32.nxv2i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv16i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv32i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv32i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv4i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv16i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv1i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv1i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv8i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv4i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv1i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv2i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv8i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv4i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv64i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv64i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv4i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv8i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv1i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv2i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv8i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv32i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv32i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv16i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv2i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv2i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv16i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv32i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv32i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv4i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv16i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv1i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv1i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv1i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv8i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv4i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv1i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv2i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv8i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv4i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv4i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv64i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv64i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv4i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv8i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv8i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv1i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv2i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv8i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv32i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv32i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv16i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv2i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv2i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv16i8.nxv2i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv16i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv32i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv32i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv4i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv16i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv1i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv1i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv1i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv8i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv4i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv1i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv2i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv8i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv4i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv4i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv64i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv64i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv4i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv8i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv8i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv1i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv2i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv8i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv32i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv32i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv16i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv2i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv2i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv16i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv16i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv16i8.nxv2i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv16i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv16i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv16i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv32i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv32i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv32i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv4i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv4i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv16i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv16i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv16i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv1i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv1i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv1i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv1i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv8i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv8i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv4i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv4i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv1i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv1i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv2i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv2i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv8i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv8i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv4i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv4i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv64i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv64i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv64i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv4i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv4i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv8i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv8i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv1i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv1i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv2i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv2i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv8i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv8i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv32i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv32i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv32i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv16i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv16i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv16i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv2i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv2i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv2i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i64.nxv2i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv16i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv16i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv16i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv32i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv32i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv32i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i32(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv4i32(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv4i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv16i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv16i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv16i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i64(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv1i64(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv1i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i32(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv1i32(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv1i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv8i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv8i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv4i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv4i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv1i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv1i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i32(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv2i32(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv2i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv8i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv8i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i64(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv4i64(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv4i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv64i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv64i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv64i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv4i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv4i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i64(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv8i64(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv8i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv1i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv1i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv2i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv2i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i32(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv8i32(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv8i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv32i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv32i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv32i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv16i32(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv16i32(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv16i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv2i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv2i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i64(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv2i64(,,, i64*, , , i64) + +define @test_vlxseg3_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i64.nxv2i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv16i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv16i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv16i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv32i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv32i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv32i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i32(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv4i32(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv4i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv16i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv16i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv16i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv1i64(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv1i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv1i32(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv1i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv8i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv8i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv4i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv4i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv1i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv1i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i32(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv2i32(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv2i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv8i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv8i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i64(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv4i64(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv4i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv64i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv64i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv64i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv4i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv4i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i64(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv8i64(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv8i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv1i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv1i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv2i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv2i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i32(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv8i32(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv8i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv32i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv32i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv32i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv16i32(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv16i32(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv16i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv2i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv2i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i64(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv2i64(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i64.nxv2i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv16i16(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv16i16(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv16i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv32i16(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv32i16(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv32i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i32(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv4i32(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv4i32( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv16i8(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv16i8(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv16i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv1i64(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv1i64( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv1i32(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv1i32( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i16(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv8i16(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv8i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i8(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv4i8(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv4i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv1i16(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv1i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i32(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv2i32(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv2i32( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i8(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv8i8(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv8i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i64(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv4i64(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv4i64( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv64i8(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv64i8(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv64i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i16(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv4i16(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv4i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i64(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv8i64(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv8i64( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv1i8(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv1i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i8(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv2i8(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv2i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i32(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv8i32(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv8i32( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv32i8(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv32i8(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv32i8( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv16i32(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv16i32(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv16i32( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i16(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv2i16(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv2i16( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i64(i64*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv2i64(,,,,, i64*, , , i64) + +define @test_vlxseg5_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i64.nxv2i64( %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv16i16(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv16i16(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv16i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv32i16(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv32i16(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv32i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i32(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv4i32(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv4i32( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv16i8(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv16i8(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv16i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv1i64(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv1i64( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv1i32(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv1i32( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i16(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv8i16(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv8i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i8(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv4i8(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv4i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv1i16(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv1i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i32(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv2i32(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv2i32( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i8(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv8i8(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv8i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i64(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv4i64(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv4i64( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv64i8(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv64i8(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv64i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i16(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv4i16(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv4i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i64(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv8i64(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv8i64( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv1i8(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv1i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i8(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv2i8(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv2i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i32(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv8i32(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv8i32( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv32i8(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv32i8(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv32i8( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv16i32(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv16i32(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv16i32( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i16(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv2i16(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv2i16( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i64(i64*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv2i64(,,,,,, i64*, , , i64) + +define @test_vlxseg6_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i64.nxv2i64( %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv16i16(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv16i16(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv32i16(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv32i16(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i32(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv4i32(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv16i8(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv16i8(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv1i64(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv1i32(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i16(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv8i16(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i8(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv4i8(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv1i16(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i32(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv2i32(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i8(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv8i8(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i64(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv4i64(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv64i8(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv64i8(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i16(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv4i16(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i64(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv8i64(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv1i8(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i8(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv2i8(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i32(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv8i32(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv32i8(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv32i8(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv16i32(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv16i32(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i16(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv2i16(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i64(i64*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv2i64(,,,,,,, i64*, , , i64) + +define @test_vlxseg7_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i64.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv16i16(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv16i16(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv32i16(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv32i16(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i32(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv4i32(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv16i8(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv16i8(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i64(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i32(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i16(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv8i16(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i8(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv4i8(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i16(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i32(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv2i32(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i8(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv8i8(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i64(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv4i64(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv64i8(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv64i8(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i16(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv4i16(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i64(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv8i64(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i8(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i8(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv2i8(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i32(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv8i32(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv32i8(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv32i8(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv16i32(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv16i32(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i16(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv2i16(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i64(i64*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv2i64(,,,,,,,, i64*, , , i64) + +define @test_vlxseg8_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i64.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv16i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv16i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv32i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv32i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv4i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv16i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv16i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv1i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv1i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv8i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv4i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv2i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv8i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv4i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv4i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv64i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv64i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv4i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv8i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv8i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv2i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv8i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv32i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv32i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv16i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv16i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv2i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv2i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i32.nxv2i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv16i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv16i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv16i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv32i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv32i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv32i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv4i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv4i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv16i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv16i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv16i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv1i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv1i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv1i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv8i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv8i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv4i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv4i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv1i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv2i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv2i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv8i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv8i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv4i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv4i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv64i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv64i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv64i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv4i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv4i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv8i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv8i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv1i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv2i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv2i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv8i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv8i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv32i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv32i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv32i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv16i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv16i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv16i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv2i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv2i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv2i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i32.nxv2i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv16i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv16i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv32i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv32i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv4i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv16i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv16i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv1i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv1i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv8i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv4i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv2i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv8i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv4i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv4i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv64i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv64i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv4i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv8i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv8i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv2i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv8i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv32i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv32i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv16i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv16i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv2i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv2i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i32.nxv2i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv16i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv16i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv32i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv32i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv4i32(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv16i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv16i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv1i64(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv1i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv8i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv4i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv2i32(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv8i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv4i64(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv4i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv64i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv64i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv4i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv8i64(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv8i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv2i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv8i32(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv32i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv32i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv16i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv16i32(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv2i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv2i64(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i32.nxv2i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv16i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv16i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv32i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv32i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv4i32(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv16i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv16i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv1i64(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv1i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv8i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv4i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv2i32(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv8i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv4i64(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv4i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv64i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv64i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv4i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv8i64(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv8i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv2i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv8i32(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv32i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv32i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv16i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv16i32(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv2i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv2i64(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i32.nxv2i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv16i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv16i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv32i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv32i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv4i32(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv16i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv16i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv1i64(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv8i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv4i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv2i32(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv8i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv4i64(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv64i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv64i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv4i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv8i64(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv2i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv8i32(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv32i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv32i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv16i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv16i32(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv2i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv2i64(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv16i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv16i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv32i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv32i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv4i32(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv16i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv16i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv8i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv4i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv2i32(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv8i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv4i64(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv64i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv64i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv4i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv8i64(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv2i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv8i32(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv32i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv32i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv16i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv16i32(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv2i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv2i64(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv16i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv16i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv32i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv32i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv4i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv16i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv16i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv1i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv1i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv1i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv4i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv1i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv2i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv4i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv4i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv64i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv64i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv4i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv8i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv8i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv1i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv2i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv32i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv32i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv16i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv16i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv2i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv2i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i16.nxv2i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv16i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv16i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv32i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv32i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv4i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv16i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv16i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv1i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv1i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv1i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv4i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv1i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv2i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv4i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv4i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv64i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv64i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv4i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv8i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv8i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv1i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv2i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv32i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv32i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv16i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv16i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv2i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv2i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i16.nxv2i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv16i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv16i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv32i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv32i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv4i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv16i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv16i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv1i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv1i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv1i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv4i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv1i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv2i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv4i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv4i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv64i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv64i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv4i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv8i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv8i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv1i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv2i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv32i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv32i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv16i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv16i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv2i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv2i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i16.nxv2i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv16i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv16i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv32i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv32i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv16i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv16i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv1i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv1i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv8i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv1i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv2i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv8i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv4i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv64i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv64i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv8i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv1i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv2i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv8i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv32i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv32i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv16i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv16i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv2i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv2i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv16i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv16i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv32i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv32i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv16i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv16i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv1i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv1i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv1i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv8i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv1i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv2i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv8i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv4i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv4i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv64i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv64i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv8i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv8i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv1i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv2i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv8i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv32i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv32i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv16i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv16i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv2i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv2i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i8.nxv2i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv16i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv16i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv32i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv32i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv16i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv16i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv1i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv1i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv1i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv8i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv1i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv2i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv8i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv4i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv4i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv64i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv64i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv8i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv8i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv1i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv2i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv8i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv32i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv32i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv16i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv16i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv2i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv2i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i8.nxv2i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv16i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv16i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv32i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv32i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv16i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv16i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv1i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv1i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv1i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv8i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv1i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv2i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv8i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv4i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv4i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv64i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv64i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv8i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv8i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv1i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv2i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv8i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv32i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv32i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv16i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv16i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv2i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv2i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i8.nxv2i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv16i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv16i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv32i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv32i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv16i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv16i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv1i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv1i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv1i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv8i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv1i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv2i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv8i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv4i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv4i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv64i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv64i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv8i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv8i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv1i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv2i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv8i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv32i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv32i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv16i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv16i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv2i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv2i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i8.nxv2i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv16i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv16i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv32i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv32i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv16i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv16i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv1i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv1i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv8i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv1i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv2i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv8i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv4i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv64i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv64i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv8i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv1i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv2i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv8i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv32i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv32i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv16i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv16i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv2i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv2i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv16i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv16i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv32i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv32i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv16i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv16i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv1i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv1i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv8i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv1i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv2i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv8i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv64i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv64i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv8i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv1i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv2i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv8i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv32i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv32i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv16i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv16i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv2i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv2i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv16i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv16i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv32i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv32i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv4i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv16i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv16i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv1i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv1i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv8i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv4i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv2i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv8i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv4i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv4i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv64i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv64i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv4i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv8i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv8i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv2i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv8i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv32i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv32i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv16i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv16i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv2i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv2i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i16.nxv2i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv16i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv16i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv32i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv32i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv4i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv16i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv16i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv1i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv1i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv8i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv4i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv2i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv8i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv4i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv4i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv64i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv64i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv4i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv8i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv8i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv2i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv8i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv32i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv32i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv16i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv16i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv2i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv2i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i16.nxv2i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv16i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv16i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv32i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv32i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv4i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv16i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv16i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv1i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv1i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv8i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv4i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv2i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv8i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv4i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv4i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv64i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv64i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv4i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv8i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv8i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv2i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv8i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv32i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv32i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv16i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv16i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv2i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv2i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i16.nxv2i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv16i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv16i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv32i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv32i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv4i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv16i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv16i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv1i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv1i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv8i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv4i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv2i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv8i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv4i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv4i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv64i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv64i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv4i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv8i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv8i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv2i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv8i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv32i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv32i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv16i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv16i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv2i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv2i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i16.nxv2i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv16i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv16i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv32i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv32i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv4i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv16i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv16i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv1i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv1i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv8i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv4i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv2i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv8i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv4i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv4i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv64i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv64i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv4i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv8i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv8i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv2i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv8i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv32i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv32i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv16i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv16i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv2i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv2i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i16.nxv2i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv16i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv16i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv32i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv32i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv4i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv16i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv16i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv1i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv8i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv4i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv2i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv8i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv4i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv64i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv64i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv4i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv8i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv2i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv8i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv32i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv32i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv16i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv16i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv2i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv2i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv16i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv16i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv32i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv32i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv4i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv16i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv16i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv8i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv4i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv2i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv8i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv4i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv64i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv64i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv4i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv8i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv2i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv8i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv32i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv32i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv16i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv16i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv2i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv2i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv16i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv16i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv32i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv32i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv4i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv16i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv16i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv1i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv1i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv1i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv8i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv4i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv1i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv8i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv4i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv4i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv64i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv64i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv4i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv8i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv8i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv1i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv8i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv32i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv32i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv16i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv16i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv2i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i32.nxv2i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv16i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv16i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv16i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv32i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv32i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv32i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv4i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv4i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv16i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv16i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv16i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv1i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv1i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv1i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv1i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv8i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv8i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv4i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv4i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv1i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv1i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv2i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv8i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv8i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv4i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv4i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv64i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv64i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv64i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv4i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv4i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv8i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv8i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv1i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv1i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv2i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv8i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv8i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv32i8(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv32i8(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv32i8( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv16i32(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv16i32(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv16i32( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i16(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv2i16( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i64(i32*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv2i64(,,, i32*, , , i64) + +define @test_vlxseg3_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i32.nxv2i64( %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv16i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv16i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv32i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv32i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv4i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv16i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv16i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv1i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv1i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv1i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv8i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv4i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv1i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv8i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv4i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv4i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv64i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv64i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv4i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv8i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv8i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv1i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv8i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv32i8(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv32i8(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv16i32(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv16i32(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv2i64(,,,, i32*, , , i64) + +define @test_vlxseg4_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i32.nxv2i64( %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv16i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv16i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv32i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv32i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv4i32(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv16i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv16i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv1i64(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv1i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv1i32(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv8i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv4i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv1i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv8i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv4i64(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv4i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv64i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv64i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv4i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv8i64(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv8i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv1i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv8i32(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv32i8(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv32i8(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv16i32(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv16i32(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv2i64(,,,,, i32*, , , i64) + +define @test_vlxseg5_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i32.nxv2i64( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv16i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv16i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv32i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv32i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv4i32(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv16i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv16i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv1i64(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv1i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv1i32(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv8i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv4i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv1i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv8i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv4i64(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv4i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv64i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv64i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv4i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv8i64(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv8i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv1i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv8i32(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv32i8(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv32i8(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv16i32(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv16i32(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv2i64(,,,,,, i32*, , , i64) + +define @test_vlxseg6_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i32.nxv2i64( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv16i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv16i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv32i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv32i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv4i32(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv16i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv16i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv1i64(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv1i32(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv8i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv4i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv1i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv8i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv4i64(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv64i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv64i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv4i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv8i64(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv1i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv8i32(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv32i8(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv32i8(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv16i32(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv16i32(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv2i64(,,,,,,, i32*, , , i64) + +define @test_vlxseg7_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv16i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv16i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv32i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv32i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv4i32(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv16i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv16i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv1i64(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv1i32(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv8i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv4i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv1i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv8i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv4i64(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv64i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv64i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv4i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv8i64(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv1i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv8i32(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv32i8(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv32i8(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv16i32(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv16i32(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i16(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i64(i32*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, i32*, , , i64) + +define @test_vlxseg8_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv16i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv16i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv32i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv32i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv4i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv16i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv16i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv1i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv1i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv4i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv1i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv2i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv4i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv64i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv64i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv4i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv8i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv1i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv2i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv32i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv32i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv16i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv16i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv2i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv2i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv16i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv16i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv32i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv32i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv4i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv16i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv16i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv1i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv1i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv1i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv4i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv1i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv2i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv4i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv4i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv64i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv64i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv4i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv8i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv8i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv1i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv2i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv32i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv32i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv16i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv16i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv2i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv2i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8i8.nxv2i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv16i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv16i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv32i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv32i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv4i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv16i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv16i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv1i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv1i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv1i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv4i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv1i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv2i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv4i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv4i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv64i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv64i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv4i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv8i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv8i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv1i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv2i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv32i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv32i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv16i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv16i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv2i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv2i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8i8.nxv2i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv16i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv16i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv32i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv32i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv4i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv16i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv16i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv1i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv1i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv1i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv4i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv1i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv2i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv4i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv4i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv64i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv64i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv4i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv8i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv8i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv1i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv2i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv32i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv32i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv16i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv16i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv2i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv2i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv8i8.nxv2i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv16i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv16i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv32i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv32i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv4i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv16i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv16i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv1i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv1i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv1i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv4i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv1i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv2i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv4i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv4i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv64i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv64i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv4i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv8i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv8i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv1i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv2i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv32i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv32i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv16i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv16i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv2i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv2i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv8i8.nxv2i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv16i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv16i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv32i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv32i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv4i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv16i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv16i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv1i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv1i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv4i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv1i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv2i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv4i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv64i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv64i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv4i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv8i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv1i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv2i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv32i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv32i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv16i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv16i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv2i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv2i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv8i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv16i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv16i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv32i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv32i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv4i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv16i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv16i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv1i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv1i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv4i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv1i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv2i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv4i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv64i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv64i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv4i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv1i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv2i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv32i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv32i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv16i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv16i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv2i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv2i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv8i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv8i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv8i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv16i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv16i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv16i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv16i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv32i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv32i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv32i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv32i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv4i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv4i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv4i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv16i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv16i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv16i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv16i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv1i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv1i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv1i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv1i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv1i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv1i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv8i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv8i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv8i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv4i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv4i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv4i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv1i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv1i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv1i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv2i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv2i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv2i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv8i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv8i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv8i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv4i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv4i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv4i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv64i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv64i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv64i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv64i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv4i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv4i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv4i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv8i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv8i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv8i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv1i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv1i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv1i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv2i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv2i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv2i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv8i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv8i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv8i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv32i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv32i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv32i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv32i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv16i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv16i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv16i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv16i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv2i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv2i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv2i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv2i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv4i64_nxv2i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i64.nxv2i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv16i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv16i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv32i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv32i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv16i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv16i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv1i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv1i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv1i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv8i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv1i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv2i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv8i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv4i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv4i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv64i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv64i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv8i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv8i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv1i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv2i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv8i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv32i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv32i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv16i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv16i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv2i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv2i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4i16.nxv2i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv16i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv16i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv32i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv32i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv16i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv16i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv1i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv1i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv1i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv8i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv1i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv2i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv8i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv4i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv4i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv64i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv64i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv8i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv8i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv1i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv2i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv8i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv32i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv32i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv16i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv16i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv2i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv2i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4i16.nxv2i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv16i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv16i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv32i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv32i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv16i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv16i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv1i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv1i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv1i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv8i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv1i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv2i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv8i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv4i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv4i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv64i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv64i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv8i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv8i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv1i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv2i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv8i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv32i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv32i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv16i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv16i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv2i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv2i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4i16.nxv2i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv16i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv16i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv32i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv32i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv16i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv16i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv1i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv1i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv1i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv8i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv1i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv2i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv8i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv4i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv4i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv64i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv64i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv8i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv8i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv1i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv2i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv8i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv32i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv32i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv16i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv16i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv2i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv2i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4i16.nxv2i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv16i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv16i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv32i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv32i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv16i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv16i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv1i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv1i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv1i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv8i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv1i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv2i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv8i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv4i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv4i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv64i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv64i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv8i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv8i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv1i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv2i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv8i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv32i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv32i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv16i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv16i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv2i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv2i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4i16.nxv2i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv16i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv16i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv32i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv32i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv16i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv16i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv1i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv1i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv8i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv1i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv2i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv8i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv4i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv64i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv64i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv8i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv1i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv2i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv8i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv32i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv32i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv16i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv16i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv2i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv2i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv16i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv16i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv32i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv32i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv16i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv16i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv1i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv1i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv8i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv1i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv2i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv8i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv64i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv64i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv8i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv1i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv2i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv8i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv32i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv32i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv16i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv16i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv2i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv2i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv16i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv16i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv32i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv32i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv4i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv16i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv16i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv1i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv8i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv4i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv2i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv8i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv4i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv64i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv64i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv4i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv8i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv2i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv8i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv32i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv32i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv16i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv16i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv2i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv2i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv16i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv16i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv32i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv32i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv4i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv16i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv16i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv1i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv1i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv8i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv4i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv2i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv8i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv4i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv4i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv64i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv64i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv4i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv8i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv8i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv2i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv8i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv32i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv32i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv16i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv16i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv2i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv2i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1i8.nxv2i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv16i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv16i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv32i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv32i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv4i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv16i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv16i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv1i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv1i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv8i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv4i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv2i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv8i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv4i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv4i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv64i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv64i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv4i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv8i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv8i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv2i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv8i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv32i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv32i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv16i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv16i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv2i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv2i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1i8.nxv2i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv16i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv16i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv32i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv32i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv4i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv16i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv16i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv1i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv1i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv8i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv4i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv2i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv8i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv4i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv4i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv64i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv64i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv4i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv8i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv8i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv2i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv8i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv32i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv32i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv16i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv16i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv2i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv2i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1i8.nxv2i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv16i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv16i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv32i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv32i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv4i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv16i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv16i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv1i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv1i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv8i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv4i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv2i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv8i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv4i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv4i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv64i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv64i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv4i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv8i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv8i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv2i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv8i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv32i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv32i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv16i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv16i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv2i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv2i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1i8.nxv2i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv16i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv16i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv32i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv32i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv4i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv16i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv16i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv1i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv8i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv4i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv2i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv8i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv4i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv64i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv64i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv4i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv8i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv2i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv8i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv32i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv32i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv16i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv16i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv2i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv2i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv16i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv16i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv32i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv32i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv4i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv16i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv16i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv8i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv4i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv2i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv8i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv4i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv64i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv64i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv4i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv8i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv2i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv8i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv32i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv32i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv16i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv16i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv2i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv2i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv16i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv16i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv32i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv32i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv4i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv16i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv16i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv1i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv1i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv8i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv4i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv1i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv8i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv4i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv64i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv64i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv4i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv8i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv1i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv8i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv32i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv32i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv16i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv16i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv2i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv16i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv16i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv32i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv32i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv4i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv16i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv16i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv1i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv1i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv1i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv8i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv4i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv1i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv8i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv4i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv4i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv64i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv64i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv4i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv8i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv8i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv1i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv8i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv32i8(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv32i8(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv16i32(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv16i32(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i16(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i64(i8*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv2i64(,,, i8*, , , i64) + +define @test_vlxseg3_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i8.nxv2i64( %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv16i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv16i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv32i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv32i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv4i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv16i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv16i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv1i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv1i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv1i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv8i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv4i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv1i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv8i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv4i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv4i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv64i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv64i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv4i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv8i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv8i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv1i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv8i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv32i8(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv32i8(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv16i32(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv16i32(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv2i64(,,,, i8*, , , i64) + +define @test_vlxseg4_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i8.nxv2i64( %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv16i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv16i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv32i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv32i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv4i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv16i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv16i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv1i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv1i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv1i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv8i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv4i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv1i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv8i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv4i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv4i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv64i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv64i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv4i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv8i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv8i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv1i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv8i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv32i8(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv32i8(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv16i32(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv16i32(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv2i64(,,,,, i8*, , , i64) + +define @test_vlxseg5_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i8.nxv2i64( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv16i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv16i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv32i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv32i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv4i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv16i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv16i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv1i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv1i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv1i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv8i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv4i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv1i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv8i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv4i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv4i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv64i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv64i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv4i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv8i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv8i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv1i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv8i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv32i8(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv32i8(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv16i32(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv16i32(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv2i64(,,,,,, i8*, , , i64) + +define @test_vlxseg6_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i8.nxv2i64( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv16i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv16i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv32i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv32i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv4i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv16i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv16i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv1i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv1i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv8i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv4i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv1i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv8i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv4i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv64i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv64i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv4i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv8i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv1i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv8i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv32i8(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv32i8(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv16i32(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv16i32(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv2i64(,,,,,,, i8*, , , i64) + +define @test_vlxseg7_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv16i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv16i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv32i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv32i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv4i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv16i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv16i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv1i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv1i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv8i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv4i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv1i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv8i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv4i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv64i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv64i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv4i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv8i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv1i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv8i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv32i8(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv32i8(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv16i32(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv16i32(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i16(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i64(i8*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, i8*, , , i64) + +define @test_vlxseg8_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i8.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv16i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv16i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv16i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv16i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv16i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv32i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv32i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv32i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv32i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv32i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv4i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv4i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv4i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv16i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv16i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv16i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv16i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv16i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv1i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv1i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv1i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv1i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv1i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv1i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv1i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv8i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv8i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv4i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv4i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv4i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv1i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv1i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv1i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv2i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv2i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv2i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv8i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv8i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv4i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv4i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv4i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv4i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv64i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv64i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv64i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv64i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv64i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv4i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv4i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv4i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv4i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv8i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv8i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv8i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv8i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv1i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv1i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv1i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv1i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv2i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv2i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv2i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv8i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv8i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv8i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv32i8(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv32i8(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv32i8(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv32i8(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv32i8(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv16i32(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv16i32(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv16i32(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv16i32(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv16i32(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i16(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv2i16(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv2i16(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv2i16(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i16(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i64(i32*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv2i64(,, i32*, , , i64) + +define @test_vlxseg2_nxv8i32_nxv2i64(i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8i32_nxv2i64(i32* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8i32.nxv2i64(i32* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8i32.nxv2i64( %1, %1, i32* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv16i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv16i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv16i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv16i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv16i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv32i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv32i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv32i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv32i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv4i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv4i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv4i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv16i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv16i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv16i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv16i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv16i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv1i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv1i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv1i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv1i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv1i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv1i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv1i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv8i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv8i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv8i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv4i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv4i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv4i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv1i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv1i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv1i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv2i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv2i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv2i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv8i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv8i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv8i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv4i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv4i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv4i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv4i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv64i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv64i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv64i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv64i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv64i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv4i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv4i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv4i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv4i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv8i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv8i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv8i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv8i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv1i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv1i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv1i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv1i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv2i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv2i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv2i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv8i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv8i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv8i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv8i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv32i8(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv32i8(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv32i8(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv32i8(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv16i32(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv16i32(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv16i32(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv16i32(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv16i32(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i16(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv2i16(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv2i16(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv2i16(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i16(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i64(i8*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv2i64(,, i8*, , , i64) + +define @test_vlxseg2_nxv32i8_nxv2i64(i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv32i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv32i8_nxv2i64(i8* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv32i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv32i8.nxv2i64(i8* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv32i8.nxv2i64( %1, %1, i8* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv16i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv16i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv32i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv32i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv4i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv16i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv16i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv1i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv1i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv1i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv8i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv4i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv1i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv8i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv4i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv4i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv64i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv64i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv4i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv8i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv8i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv1i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv8i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv32i8(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv32i8(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv16i32(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv16i32(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i16(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i64(i16*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv2i64(,, i16*, , , i64) + +define @test_vlxseg2_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i16.nxv2i64( %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv16i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv16i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv32i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv32i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv4i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv16i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv16i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv1i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv1i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv1i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv8i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv4i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv1i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv8i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv4i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv4i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv64i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv64i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv4i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv8i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv8i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv1i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv8i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv32i8(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv32i8(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv16i32(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv16i32(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i16(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i64(i16*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv2i64(,,, i16*, , , i64) + +define @test_vlxseg3_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i16.nxv2i64( %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv16i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv16i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv32i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv32i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv4i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv16i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv16i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv1i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv1i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv1i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv8i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv4i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv1i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv8i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv4i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv4i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv64i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv64i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv4i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv8i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv8i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv1i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv8i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv32i8(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv32i8(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv16i32(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv16i32(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv2i64(,,,, i16*, , , i64) + +define @test_vlxseg4_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i16.nxv2i64( %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv16i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv16i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv32i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv32i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv4i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv16i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv16i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv1i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv1i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv1i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv8i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv4i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv1i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv8i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv4i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv4i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv64i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv64i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv4i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv8i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv8i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv1i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv8i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv32i8(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv32i8(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv16i32(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv16i32(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv2i64(,,,,, i16*, , , i64) + +define @test_vlxseg5_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2i16.nxv2i64( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv16i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv16i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv32i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv32i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv4i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv16i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv16i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv1i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv1i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv1i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv8i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv4i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv1i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv8i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv4i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv4i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv64i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv64i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv4i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv8i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv8i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv1i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv8i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv32i8(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv32i8(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv16i32(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv16i32(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv2i64(,,,,,, i16*, , , i64) + +define @test_vlxseg6_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2i16.nxv2i64( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv16i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv16i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv32i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv32i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv4i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv16i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv16i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv1i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv1i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv8i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv4i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv1i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv8i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv4i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv64i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv64i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv4i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv8i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv1i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv8i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv32i8(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv32i8(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv16i32(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv16i32(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv2i64(,,,,,,, i16*, , , i64) + +define @test_vlxseg7_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv16i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv16i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv16i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv16i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv32i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv32i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv32i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv32i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv4i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv4i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv16i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv16i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv16i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv16i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv1i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv1i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv1i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv1i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv8i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv8i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv4i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv4i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv1i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv1i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv8i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv8i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv4i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv4i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv64i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv64i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv64i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv64i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv4i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv4i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv4i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv8i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv8i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv1i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv1i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv1i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv8i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv8i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv8i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv32i8(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv32i8(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv32i8(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv32i8(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv16i32(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv16i32(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv16i32(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv16i32(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i16(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i16(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i64(i16*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, i16*, , , i64) + +define @test_vlxseg8_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2i16.nxv2i64(i16* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2i16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv16i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv16i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv16i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv32i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv32i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv32i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv4i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv4i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv16i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv16i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv16i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv1i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv1i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv1i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv1i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv8i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv8i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv4i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv4i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv1i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv1i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv2i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv2i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv8i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv8i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv4i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv4i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv64i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv64i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv64i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv4i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv4i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv8i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv8i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv1i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv1i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv2i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv2i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv8i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv8i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv32i8(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv32i8(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv32i8( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv16i32(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv16i32(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv16i32( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i16(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv2i16(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv2i16( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i64(i64*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv2i64(,, i64*, , , i64) + +define @test_vlxseg2_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2i64.nxv2i64( %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv16i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv16i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv16i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv32i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv32i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv32i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i32(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv4i32(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv4i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv16i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv16i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv16i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i64(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv1i64(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv1i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i32(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv1i32(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv1i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv8i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv8i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv4i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv4i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv1i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv1i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i32(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv2i32(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv2i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv8i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv8i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i64(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv4i64(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv4i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv64i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv64i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv64i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv4i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv4i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i64(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv8i64(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv8i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv1i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv1i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv2i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv2i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i32(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv8i32(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv8i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv32i8(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv32i8(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv32i8( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv16i32(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv16i32(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv16i32( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i16(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv2i16(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv2i16( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i64(i64*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv2i64(,,, i64*, , , i64) + +define @test_vlxseg3_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2i64.nxv2i64( %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv16i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv16i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv16i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv16i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv16i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv32i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv32i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv32i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv32i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv32i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i32(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv4i32(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv4i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv4i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv16i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv16i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv16i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv16i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv16i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i64(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv1i64(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv1i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv1i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i32(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv1i32(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv1i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv1i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv8i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv8i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv8i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv4i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv4i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv4i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv1i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv1i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv1i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i32(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv2i32(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv2i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv8i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv8i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv8i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i64(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv4i64(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv4i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv4i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv64i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv64i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv64i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv64i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv64i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv4i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv4i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv4i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv4i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i64(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv8i64(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv8i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv8i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv1i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv1i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv1i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv1i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv2i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv2i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i32(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv8i32(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv8i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv8i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv8i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv32i8(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv32i8(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv32i8(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv32i8(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv32i8( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv16i32(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv16i32(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv16i32(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv16i32(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv16i32( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i16(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv2i16(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv2i16( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i64(i64*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv2i64(,,,, i64*, , , i64) + +define @test_vlxseg4_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2i64.nxv2i64( %1, %1, %1, %1, i64* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv16i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv16i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv32i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv32i16(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv32i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv4i32(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv4i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv16i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv16i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv1i64(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv1i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv1i32(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv1i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv8i16(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv8i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv4i8(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv4i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv1i16(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv1i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv2i32(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv2i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv8i8(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv8i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv4i64(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv4i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv64i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv64i8(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv64i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv4i16(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv4i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv8i64(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv8i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv1i8(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv1i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv2i8(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv2i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv8i32(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv8i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv32i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv32i8(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv32i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv16i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv16i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv2i16(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv2i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv2i64(,, half*, , , i64) + +define @test_vlxseg2_nxv16f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv16f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv16f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv16f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv16f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv16f16.nxv2i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv16i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv16i16(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv16i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv16i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv32i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv32i16(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv32i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv32i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv4i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv4i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv16i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv16i8(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv16i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv16i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv1i64(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv1i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv1i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv1i32(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv1i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv1i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv8i16(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv8i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv8i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv4i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv4i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv1i16(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv1i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv1i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv2i32(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv2i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv2i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv8i8(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv8i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv8i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv4i64(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv4i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv4i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv64i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv64i8(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv64i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv64i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv4i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv4i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv8i64(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv8i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv8i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv1i8(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv1i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv1i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv2i8(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv2i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv2i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv8i32(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv8i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv8i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv32i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv32i8(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv32i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv32i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv16i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv16i32(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv16i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv16i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv2i16(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv2i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv2i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv2i64(,, double*, , , i64) + +define @test_vlxseg2_nxv4f64_nxv2i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f64.nxv2i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv16i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv16i16(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv16i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv32i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv32i16(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv32i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv4i32(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv4i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv16i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv16i8(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv16i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv1i64(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv1i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv1i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv8i16(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv8i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv4i8(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv4i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv1i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv2i32(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv2i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv8i8(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv8i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv4i64(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv4i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv64i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv64i8(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv64i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv4i16(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv4i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv8i64(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv8i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv1i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv2i8(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv2i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv8i32(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv8i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv32i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv32i8(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv32i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv16i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv16i32(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv16i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv2i16(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv2i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv2i64(,, double*, , , i64) + +define @test_vlxseg2_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f64.nxv2i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv32i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv32i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i32(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i32(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i64(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i64(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i32(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i32(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i32(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i64(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i64(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv64i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv64i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv64i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv4i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i64(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i64(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv1i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i32(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i32(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv8i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv32i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv32i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i32(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i32(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv16i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i64(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i64(,,, double*, , , i64) + +define @test_vlxseg3_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f64.nxv2i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv16i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv16i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv32i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv32i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i32(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv4i32(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv16i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv16i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i64(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv1i64(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv1i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i32(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv8i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv4i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i32(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv2i32(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv8i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i64(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv4i64(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv4i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv64i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv64i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv4i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i64(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv8i64(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv8i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv2i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i32(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv8i32(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv32i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv32i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv16i32(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv16i32(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv2i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i64(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv2i64(,,,, double*, , , i64) + +define @test_vlxseg4_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f64.nxv2i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv16i16(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv16i16(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv32i16(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv32i16(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i32(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv4i32(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv16i8(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv16i8(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv1i64(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv1i64( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i16(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv8i16(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i8(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv4i8(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i32(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv2i32(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i8(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv8i8(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i64(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv4i64(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv4i64( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv64i8(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv64i8(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i16(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv4i16(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i64(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv8i64(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv8i64( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i8(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv2i8(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i32(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv8i32(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv32i8(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv32i8(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv16i32(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv16i32(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i16(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv2i16(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i64(double*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv2i64(,,,,, double*, , , i64) + +define @test_vlxseg5_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f64.nxv2i64( %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv16i16(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv16i16(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv32i16(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv32i16(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i32(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv4i32(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv16i8(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv16i8(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv1i64(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv1i64( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i16(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv8i16(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i8(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv4i8(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i32(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv2i32(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i8(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv8i8(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i64(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv4i64(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv4i64( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv64i8(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv64i8(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i16(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv4i16(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i64(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv8i64(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv8i64( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i8(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv2i8(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i32(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv8i32(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv32i8(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv32i8(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv16i32(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv16i32(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i16(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv2i16(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i64(double*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv2i64(,,,,,, double*, , , i64) + +define @test_vlxseg6_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f64.nxv2i64( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv16i16(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv16i16(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv32i16(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv32i16(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i32(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv4i32(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv16i8(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv16i8(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv1i64(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv1i64( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i16(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv8i16(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i8(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv4i8(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i32(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv2i32(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i8(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv8i8(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i64(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv4i64(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv4i64( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv64i8(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv64i8(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i16(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv4i16(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i64(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv8i64(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv8i64( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i8(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv2i8(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i32(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv8i32(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv32i8(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv32i8(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv16i32(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv16i32(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i16(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv2i16(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i64(double*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv2i64(,,,,,,, double*, , , i64) + +define @test_vlxseg7_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f64.nxv2i64( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv16i16(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv16i16(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv16i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv32i16(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv32i16(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv32i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i32(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv4i32(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv4i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv16i8(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv16i8(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv16i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i64(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i32(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i16(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv8i16(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv8i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i8(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv4i8(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv4i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i16(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i32(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv2i32(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv2i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i8(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv8i8(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv8i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i64(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv4i64(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv4i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv64i8(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv64i8(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv64i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i16(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv4i16(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv4i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i64(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv8i64(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv8i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i8(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i8(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv2i8(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv2i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i32(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv8i32(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv8i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv32i8(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv32i8(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv32i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv16i32(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv16i32(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv16i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i16(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv2i16(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv2i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i64(double*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv2i64(,,,,,,,, double*, , , i64) + +define @test_vlxseg8_nxv1f64_nxv2i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f64.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv16i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv16i16(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv16i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv32i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv32i16(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv32i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv4i32(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv4i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv16i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv16i8(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv16i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv1i64(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv1i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv1i32(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv1i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv8i16(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv8i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv4i8(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv4i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv1i16(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv1i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv2i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv8i8(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv8i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv4i64(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv4i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv64i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv64i8(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv64i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv4i16(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv4i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv8i64(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv8i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv1i8(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv1i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv2i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv8i32(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv8i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv32i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv32i8(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv32i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv16i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv16i32(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv16i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv2i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv2i64(,, float*, , , i64) + +define @test_vlxseg2_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f32.nxv2i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv16i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv16i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv16i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv32i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv32i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv32i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv4i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv4i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv16i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv16i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv16i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv1i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv1i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv1i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv1i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv8i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv8i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv4i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv4i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv1i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv1i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv2i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv8i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv8i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv4i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv4i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv64i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv64i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv64i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv4i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv4i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv8i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv8i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv1i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv1i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv2i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv8i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv8i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv32i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv32i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv32i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv16i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv16i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv16i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv2i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv2i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f32.nxv2i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv16i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv16i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv32i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv32i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv4i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv16i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv16i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv1i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv1i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv1i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv8i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv4i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv1i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv8i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv4i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv4i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv64i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv64i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv4i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv8i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv8i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv1i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv8i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv32i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv32i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv16i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv16i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv2i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f32.nxv2i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv16i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv16i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv32i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv32i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i32(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv4i32(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv16i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv16i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i64(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv1i64(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv1i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i32(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv1i32(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv8i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv4i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv1i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv8i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i64(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv4i64(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv4i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv64i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv64i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv4i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i64(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv8i64(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv8i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv1i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i32(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv8i32(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv32i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv32i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv16i32(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv16i32(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv2i64(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f32.nxv2i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv16i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv16i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv32i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv32i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv4i32(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv16i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv16i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv1i64(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv1i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv1i32(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv8i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv4i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv1i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv8i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv4i64(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv4i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv64i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv64i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv4i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv8i64(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv8i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv1i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv8i32(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv32i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv32i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv16i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv16i32(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv2i64(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f32.nxv2i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv16i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv16i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv32i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv32i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv4i32(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv16i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv16i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv1i64(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv1i32(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv8i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv4i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv1i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv8i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv4i64(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv64i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv64i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv4i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv8i64(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv1i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv8i32(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv32i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv32i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv16i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv16i32(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv2i64(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv16i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv16i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv32i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv32i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv4i32(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv16i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv16i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv1i64(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv1i32(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv8i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv4i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv1i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv8i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv4i64(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv64i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv64i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv4i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv8i64(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv1i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv8i32(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv32i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv32i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv16i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv16i32(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv16i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv16i16(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv16i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv32i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv32i16(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv32i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv4i32(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv4i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv16i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv16i8(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv16i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv1i64(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv1i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv1i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv8i16(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv8i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv4i8(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv4i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv1i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv2i32(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv2i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv8i8(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv8i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv4i64(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv4i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv64i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv64i8(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv64i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv4i16(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv4i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv8i64(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv8i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv1i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv2i8(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv2i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv8i32(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv8i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv32i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv32i8(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv32i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv16i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv16i32(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv16i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv2i16(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv2i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv2i64(,, half*, , , i64) + +define @test_vlxseg2_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f16.nxv2i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv16i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv16i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv32i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv32i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv4i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv16i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv16i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv1i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv1i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv8i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv4i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv2i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv8i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv4i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv4i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv64i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv64i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv4i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv8i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv8i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv2i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv8i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv32i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv32i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv16i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv16i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv2i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv2i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f16.nxv2i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv16i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv16i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv32i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv32i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv4i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv16i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv16i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv1i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv1i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv8i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv4i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv2i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv8i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv4i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv4i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv64i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv64i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv4i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv8i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv8i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv2i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv8i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv32i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv32i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv16i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv16i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv2i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv2i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f16.nxv2i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv16i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv16i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv32i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv32i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv4i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv16i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv16i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv1i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv1i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv8i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv4i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv2i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv8i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv4i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv4i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv64i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv64i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv4i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv8i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv8i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv2i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv8i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv32i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv32i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv16i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv16i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv2i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv2i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f16.nxv2i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv16i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv16i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv32i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv32i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv4i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv16i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv16i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv1i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv1i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv8i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv4i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv2i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv8i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv4i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv4i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv64i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv64i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv4i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv8i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv8i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv2i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv8i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv32i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv32i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv16i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv16i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv2i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv2i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f16.nxv2i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv16i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv16i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv32i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv32i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv4i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv16i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv16i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv1i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv8i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv4i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv2i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv8i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv4i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv64i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv64i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv4i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv8i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv2i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv8i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv32i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv32i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv16i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv16i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv2i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv2i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv16i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv16i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv32i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv32i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv4i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv16i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv16i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv8i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv4i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv2i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv8i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv4i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv64i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv64i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv4i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv8i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv2i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv8i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv32i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv32i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv16i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv16i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv2i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv2i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv1f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv16i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv16i16(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv16i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv32i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv32i16(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv32i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv4i32(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv4i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv16i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv16i8(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv16i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv1i64(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv1i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv1i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv8i16(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv8i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv4i8(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv4i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv1i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv2i32(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv2i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv8i8(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv8i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv4i64(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv4i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv64i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv64i8(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv64i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv4i16(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv4i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv8i64(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv8i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv1i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv2i8(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv2i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv8i32(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv8i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv32i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv32i8(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv32i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv16i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv16i32(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv16i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv2i16(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv2i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv2i64(,, float*, , , i64) + +define @test_vlxseg2_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv1f32.nxv2i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv16i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv16i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv16i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv32i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv32i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv32i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv4i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv4i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv16i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv16i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv16i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv1i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv1i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv1i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv8i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv8i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv4i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv4i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv1i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv2i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv2i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv8i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv8i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv4i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv4i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv64i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv64i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv64i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv4i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv4i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv8i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv8i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv1i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv2i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv2i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv8i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv8i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv32i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv32i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv32i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv16i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv16i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv16i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv2i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv2i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv2i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv1f32.nxv2i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv16i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv16i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv32i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv32i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv4i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv16i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv16i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv1i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv1i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv8i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv4i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv2i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv8i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv4i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv4i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv64i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv64i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv4i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv8i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv8i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv2i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv8i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv32i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv32i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv16i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv16i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv2i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv2i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv1f32.nxv2i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv16i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv16i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv32i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv32i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i32(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv4i32(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv16i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv16i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv1i64(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv1i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv8i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv4i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i32(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv2i32(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv8i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i64(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv4i64(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv4i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv64i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv64i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv4i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i64(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv8i64(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv8i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv2i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i32(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv8i32(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv32i8(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv32i8(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv16i32(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv16i32(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i16(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv2i16(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i64(float*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv2i64(,,,,, float*, , , i64) + +define @test_vlxseg5_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv1f32.nxv2i64( %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv16i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv16i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv32i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv32i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv4i32(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv16i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv16i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv1i64(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv1i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv8i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv4i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv2i32(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv8i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv4i64(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv4i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv64i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv64i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv4i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv8i64(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv8i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv2i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv8i32(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv32i8(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv32i8(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv16i32(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv16i32(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i16(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv2i16(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i64(float*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv2i64(,,,,,, float*, , , i64) + +define @test_vlxseg6_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv1f32.nxv2i64( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv16i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv16i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv32i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv32i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv4i32(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv16i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv16i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv1i64(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv8i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv4i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv2i32(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv8i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv4i64(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv64i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv64i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv4i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv8i64(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv2i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv8i32(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv32i8(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv32i8(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv16i32(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv16i32(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i16(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv2i16(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i64(float*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv2i64(,,,,,,, float*, , , i64) + +define @test_vlxseg7_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv1f32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv16i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv16i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv32i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv32i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv4i32(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv16i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv16i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv8i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv4i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv2i32(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv8i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv4i64(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv64i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv64i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv4i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv8i64(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv2i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv8i32(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv32i8(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv32i8(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv16i32(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv16i32(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i16(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv2i16(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i64(float*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv2i64(,,,,,,,, float*, , , i64) + +define @test_vlxseg8_nxv1f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv1f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv1f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv1f32.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv16i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv16i16(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv16i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv32i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv32i16(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv32i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv4i32(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv4i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv16i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv16i8(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv16i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv1i64(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv1i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv1i32(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv1i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv8i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv4i8(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv4i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv1i16(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv1i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv2i32(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv2i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv8i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv4i64(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv4i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv64i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv64i8(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv64i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv4i16(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv4i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv8i64(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv8i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv1i8(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv1i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv2i8(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv2i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv8i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv32i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv32i8(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv32i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv16i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv16i32(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv16i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv2i16(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv2i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv2i64(,, half*, , , i64) + +define @test_vlxseg2_nxv8f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f16.nxv2i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv16i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv16i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv32i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv32i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv4i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv16i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv16i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv1i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv1i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv1i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv4i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv1i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv2i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv4i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv4i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv64i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv64i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv4i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv8i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv8i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv1i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv2i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv32i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv32i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv16i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv16i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv2i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv2i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv8f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv8f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv8f16.nxv2i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv16i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv16i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv32i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv32i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv4i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv16i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv16i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv1i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv1i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv1i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv4i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv1i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv2i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv4i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv4i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv64i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv64i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv4i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv8i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv8i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv1i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv2i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv32i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv32i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv16i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv16i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv2i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv2i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv8f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv8f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv8f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv8f16.nxv2i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv16i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv16i16(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv16i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv32i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv32i16(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv32i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv4i32(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv4i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv16i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv16i8(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv16i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv1i64(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv1i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv1i32(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv1i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv8i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv4i8(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv4i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv1i16(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv1i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv2i32(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv2i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv8i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv4i64(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv4i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv64i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv64i8(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv64i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv4i16(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv4i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv8i64(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv8i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv1i8(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv1i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv2i8(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv2i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv8i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv32i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv32i8(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv32i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv16i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv16i32(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv16i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv2i16(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv2i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv2i64(,, float*, , , i64) + +define @test_vlxseg2_nxv8f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv8f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v12, (a0), v16 +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv8f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv8f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16 +; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v4, (a0), v16, v0.t +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv8f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv8f32.nxv2i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv16i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv16i16(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv16i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv16i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv32i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv32i16(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv32i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv32i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv4i32(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv4i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv4i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv16i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv16i8(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv16i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv16i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv1i64(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv1i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv1i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv1i32(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv1i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv1i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv8i16(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv8i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv8i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv4i8(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv4i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv4i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv1i16(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv1i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv1i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv2i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv8i8(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv8i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv8i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv4i64(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv4i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv4i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv64i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv64i8(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv64i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv64i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv4i16(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv4i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv4i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv8i64(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv8i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv8i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv1i8(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv1i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv1i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv2i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv8i32(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv8i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv8i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv32i8(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv32i8(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv32i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv32i8( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv16i32(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv16i32(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv16i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv16i32( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i16(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv2i16( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i64(double*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv2i64(,, double*, , , i64) + +define @test_vlxseg2_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f64.nxv2i64( %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv16i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv16i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv16i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv16i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv32i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv32i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv32i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv32i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i32(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv4i32(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv4i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv4i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv16i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv16i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv16i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv16i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i64(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv1i64(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv1i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv1i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i32(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv1i32(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv1i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv1i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv8i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv8i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv8i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv4i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv4i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv4i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv1i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv1i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv1i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i32(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv2i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv8i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv8i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv8i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i64(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv4i64(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv4i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv4i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv64i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv64i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv64i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv64i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv4i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv4i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv4i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i64(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv8i64(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv8i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv8i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv1i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv1i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv1i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv2i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i32(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv8i32(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv8i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv8i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv32i8(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv32i8(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv32i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv32i8( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv16i32(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv16i32(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv16i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv16i32( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i16(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv2i16( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i64(double*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv2i64(,,, double*, , , i64) + +define @test_vlxseg3_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f64.nxv2i64( %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv16i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv16i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv16i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv16i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv16i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv16i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv32i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv32i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv32i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv32i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv32i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv32i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i32(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv4i32(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv4i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv4i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv4i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv16i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv16i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv16i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv16i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv16i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv16i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i64(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv1i64(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv1i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv1i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv1i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i32(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv1i32(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv1i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv1i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv1i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv8i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv8i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv8i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv8i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv4i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv4i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv4i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv4i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv1i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv1i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv1i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv1i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i32(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv2i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv2i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv8i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv8i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv8i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv8i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i64(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv4i64(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv4i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv4i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv4i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv64i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv64i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv64i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv64i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv64i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv64i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv4i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv4i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv4i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv4i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv4i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i64(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv8i64(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv8i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv8i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv8i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv1i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv1i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv1i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv1i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv1i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv2i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv2i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i32(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv8i32(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv8i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv8i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv8i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv8i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv32i8(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv32i8(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv32i8(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv32i8(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv32i8(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv32i8( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv16i32(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv16i32(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv16i32(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv16i32(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv16i32(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv16i32( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i16(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv2i16(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv2i16( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i64(double*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv2i64(,,,, double*, , , i64) + +define @test_vlxseg4_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f64_nxv2i64(double* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f64.nxv2i64( %1, %1, %1, %1, double* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv16i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv16i16(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv16i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv32i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv32i16(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv32i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv4i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv16i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv16i8(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv16i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv1i64(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv1i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv1i32(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv1i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv8i16(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv8i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv4i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv1i16(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv1i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv2i32(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv2i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv8i8(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv8i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv4i64(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv4i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv64i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv64i8(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv64i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv4i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv8i64(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv8i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv1i8(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv1i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv2i8(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv2i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv8i32(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv8i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv32i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv32i8(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv32i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv16i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv16i32(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv16i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv2i16(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv2i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv2i64(,, half*, , , i64) + +define @test_vlxseg2_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f16.nxv2i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv16i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv16i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv32i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv32i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv16i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv16i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv1i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv1i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv1i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv8i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv1i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv2i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv8i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv4i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv4i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv64i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv64i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv8i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv8i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv1i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv2i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv8i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv32i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv32i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv16i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv16i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv2i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv2i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f16.nxv2i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv16i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv16i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv32i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv32i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv16i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv16i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv1i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv1i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv1i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv8i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv1i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv2i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv8i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv4i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv4i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv64i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv64i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv8i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv8i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv1i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv2i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv8i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv32i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv32i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv16i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv16i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv2i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv2i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f16.nxv2i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv16i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv16i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv32i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv32i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv16i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv16i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv1i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv1i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv1i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv8i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv1i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv2i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv8i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv4i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv4i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv64i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv64i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv8i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv8i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv1i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv2i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv8i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv32i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv32i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv16i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv16i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv2i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv2i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv4f16.nxv2i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv16i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv16i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv32i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv32i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv16i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv16i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv1i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv1i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv1i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv8i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv1i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv2i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv8i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv4i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv4i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv64i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv64i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv8i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv8i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv1i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv2i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv8i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv32i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv32i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv16i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv16i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv2i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv2i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv4f16.nxv2i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv16i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv16i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv32i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv32i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv16i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv16i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv1i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv1i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv8i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv1i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv2i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv8i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv4i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv64i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv64i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv8i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv1i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv2i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv8i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv32i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv32i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv16i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv16i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv2i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv2i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv4f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv16i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv16i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv32i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv32i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv16i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv16i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv1i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv1i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv8i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv1i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv2i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv8i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv64i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv64i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv8i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv1i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv2i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv8i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv32i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv32i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv16i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv16i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv2i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv2i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv4f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv4f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv4f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv4f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv16i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv16i16(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv16i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv32i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv32i16(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv32i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv4i32(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv4i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv16i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv16i8(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv16i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv1i64(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv1i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv1i32(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv1i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv8i16(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv8i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv4i8(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv4i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv1i16(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv1i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv2i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv8i8(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv8i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv4i64(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv4i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv64i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv64i8(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv64i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv4i16(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv4i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv8i64(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv8i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv1i8(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv1i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv2i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv8i32(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv8i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv32i8(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv32i8(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv32i8( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv16i32(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv16i32(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv16i32( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i16(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv2i16( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i64(half*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv2i64(,, half*, , , i64) + +define @test_vlxseg2_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv2f16.nxv2i64( %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv16i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv16i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv32i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv32i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv4i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv16i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv16i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv1i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv1i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv1i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv8i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv4i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv1i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv8i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv4i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv4i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv64i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv64i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv4i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv8i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv8i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv1i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv8i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv32i8(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv32i8(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv16i32(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv16i32(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i16(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i64(half*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv2i64(,,, half*, , , i64) + +define @test_vlxseg3_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv2f16.nxv2i64( %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv16i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv16i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv32i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv32i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv4i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv16i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv16i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv1i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv1i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv1i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv8i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv4i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv1i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv8i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv4i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv4i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv64i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv64i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv4i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv8i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv8i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv1i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv8i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv32i8(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv32i8(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv16i32(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv16i32(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i16(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i64(half*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv2i64(,,,, half*, , , i64) + +define @test_vlxseg4_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv2f16.nxv2i64( %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv16i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv16i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv32i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv32i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv4i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv16i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv16i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv1i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv1i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv1i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv8i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv4i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv1i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv8i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv4i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv4i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv64i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv64i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv4i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv8i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv8i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv1i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv8i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv32i8(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv32i8(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv16i32(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv16i32(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv2i64(,,,,, half*, , , i64) + +define @test_vlxseg5_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg5_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg5_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg5_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg5ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,} @llvm.riscv.vlxseg5.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,} %0, 0 + %2 = tail call {,,,,} @llvm.riscv.vlxseg5.mask.nxv2f16.nxv2i64( %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv16i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv16i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv32i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv32i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv4i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv16i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv16i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv1i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv1i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv1i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv8i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv4i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv1i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv8i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv4i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv4i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv64i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv64i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv4i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv8i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv8i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv1i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv8i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv32i8(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv32i8(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv16i32(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv16i32(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv2i64(,,,,,, half*, , , i64) + +define @test_vlxseg6_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg6_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg6_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg6_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg6ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,} @llvm.riscv.vlxseg6.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,} %0, 0 + %2 = tail call {,,,,,} @llvm.riscv.vlxseg6.mask.nxv2f16.nxv2i64( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv16i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv16i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv32i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv32i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv4i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv16i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv16i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv1i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv1i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv8i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv4i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv1i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv8i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv4i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv64i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv64i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv4i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv8i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv1i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv8i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv32i8(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv32i8(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv16i32(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv16i32(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv2i64(,,,,,,, half*, , , i64) + +define @test_vlxseg7_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg7_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg7_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg7_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg7ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,} @llvm.riscv.vlxseg7.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,} %0, 0 + %2 = tail call {,,,,,,} @llvm.riscv.vlxseg7.mask.nxv2f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv16i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv16i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv16i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv16i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv16i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv32i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv32i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv32i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv32i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv32i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv4i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv4i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv4i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv16i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv16i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv16i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv16i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv16i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv1i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv1i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv1i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv1i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv1i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv1i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv1i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv8i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv8i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv8i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv4i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv4i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv4i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv1i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv1i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv1i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv2i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv8i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv8i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv8i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv4i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv4i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv4i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv4i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv64i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv64i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv64i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv64i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv64i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv4i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv4i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv4i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv4i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv8i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv8i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv8i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv8i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv1i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv1i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv1i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv1i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv2i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv8i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv8i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv8i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv8i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv32i8(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv32i8(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv32i8(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv32i8(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei8.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv32i8(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv16i32(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv16i32(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv16i32(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv16i32(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei32.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv16i32(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i16(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv2i16(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei16.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i16(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i64(half*, , i64) +declare {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, half*, , , i64) + +define @test_vlxseg8_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg8_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v15, (a0), v16 +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 +} + +define @test_vlxseg8_mask_nxv2f16_nxv2i64(half* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg8_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu +; CHECK-NEXT: vlxseg8ei64.v v1, (a0), v16, v0.t +; CHECK-NEXT: vmv1r.v v16, v2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.nxv2f16.nxv2i64(half* %base, %index, i64 %vl) + %1 = extractvalue {,,,,,,,} %0, 0 + %2 = tail call {,,,,,,,} @llvm.riscv.vlxseg8.mask.nxv2f16.nxv2i64( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,,,,,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv16i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv16i16(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv16i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv32i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv32i16(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv32i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv4i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv16i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv16i8(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv16i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv1i64(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv1i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv1i32(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv1i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv8i16(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv8i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv4i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv1i16(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv1i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv2i32(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv2i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv8i8(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv8i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv4i64(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv4i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv64i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv64i8(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv64i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv4i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv8i64(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv8i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv1i8(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv1i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv2i8(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv2i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv8i32(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv8i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv32i8(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv32i8(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv32i8( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv16i32(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv16i32(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv16i32( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i16(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv2i16(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv2i16( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i64(float*, , i64) +declare {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv2i64(,, float*, , , i64) + +define @test_vlxseg2_nxv4f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg2_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 +} + +define @test_vlxseg2_mask_nxv4f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg2_mask_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg2ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,} @llvm.riscv.vlxseg2.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,} %0, 0 + %2 = tail call {,} @llvm.riscv.vlxseg2.mask.nxv4f32.nxv2i64( %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv16i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv16i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv16i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv32i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv32i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv32i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv4i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv16i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv16i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv16i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv1i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv1i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv1i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv1i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv8i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv8i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv4i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv1i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv1i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv2i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv2i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv8i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv8i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv4i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv4i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv64i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv64i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv64i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv4i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv8i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv8i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv1i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv1i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv2i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv2i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv8i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv8i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv32i8(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv32i8(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv32i8( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv16i32(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv16i32(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv16i32( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i16(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv2i16(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv2i16( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i64(float*, , i64) +declare {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv2i64(,,, float*, , , i64) + +define @test_vlxseg3_nxv4f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg3_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 +} + +define @test_vlxseg3_mask_nxv4f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg3_mask_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg3ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,} @llvm.riscv.vlxseg3.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,} %0, 0 + %2 = tail call {,,} @llvm.riscv.vlxseg3.mask.nxv4f32.nxv2i64( %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv16i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv16i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv16i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv16i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv16i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv16i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv32i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv32i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv32i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv32i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv32i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv32i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv4i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv4i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv16i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv16i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv16i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv16i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv16i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv16i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv1i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv1i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv1i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv1i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv1i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv1i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv1i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv1i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv8i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv8i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv8i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv8i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv4i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv4i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv1i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv1i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv1i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv1i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv2i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv2i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv2i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv2i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv8i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv8i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv8i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv8i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv4i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv4i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv4i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv64i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv64i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv64i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv64i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv64i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv64i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv4i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv4i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv8i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv8i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv8i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv8i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv1i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv1i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv1i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv1i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv1i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv2i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv2i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv2i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv2i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv8i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv8i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv8i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv8i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv8i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv32i8(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv32i8(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv32i8(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv32i8(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei8.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv32i8(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv32i8( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv16i32(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv16i32(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv16i32(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv16i32(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei32.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv16i32(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv16i32( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i16(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv2i16(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv2i16(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv2i16(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei16.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i16(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv2i16( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} + +declare {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i64(float*, , i64) +declare {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv2i64(,,,, float*, , , i64) + +define @test_vlxseg4_nxv4f32_nxv2i64(float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vlxseg4_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v14, (a0), v16 +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 +} + +define @test_vlxseg4_mask_nxv4f32_nxv2i64(float* %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vlxseg4_mask_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vlxseg4ei64.v v2, (a0), v16, v0.t +; CHECK-NEXT: vmv2r.v v16, v4 +; CHECK-NEXT: ret +entry: + %0 = tail call {,,,} @llvm.riscv.vlxseg4.nxv4f32.nxv2i64(float* %base, %index, i64 %vl) + %1 = extractvalue {,,,} %0, 0 + %2 = tail call {,,,} @llvm.riscv.vlxseg4.mask.nxv4f32.nxv2i64( %1, %1, %1, %1, float* %base, %index, %mask, i64 %vl) + %3 = extractvalue {,,,} %2, 1 + ret %3 +} +